* Some old version linux, like 2.4, assumes physical memory existing
* in 2nd 64M space.
*/
- dom0_start = alloc_boot_pages(dom0_size,dom0_align);
+ dom0_start = alloc_boot_pages(
+ dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
+ dom0_start <<= PAGE_SHIFT;
if (!dom0_start) {
printf("construct_dom0: can't allocate contiguous memory size=%p\n",
dom0_size);
{
domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
printf("alloc_domU_staging: starting (initializing %d MB...)\n",domU_staging_size/(1024*1024));
- domU_staging_start= alloc_boot_pages(domU_staging_size,domU_staging_align);
+ domU_staging_start = alloc_boot_pages(
+ domU_staging_size >> PAGE_SHIFT, domU_staging_align >> PAGE_SHIFT);
+ domU_staging_start <<= PAGE_SHIFT;
if (!domU_staging_size) {
printf("alloc_domU_staging: can't allocate, spinning...\n");
while(1);
+
+ /* Request continuous trunk from boot allocator, since HV
+ * address is identity mapped */
-+ p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
++ p = alloc_boot_pages(frame_table_size>>PAGE_SHIFT, FT_ALIGN_SIZE>>PAGE_SHIFT) << PAGE_SHIFT;
+ if (p == 0)
+ panic("Not enough memory for frame table.\n");
+
#define FT_ALIGN_SIZE (16UL << 20)
void __init init_frametable(void)
{
- unsigned long i, p;
+ unsigned long i, pfn;
frame_table_size = max_page * sizeof(struct pfn_info);
frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
/* Request continuous trunk from boot allocator, since HV
* address is identity mapped */
- p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
- if (p == 0)
+ pfn = alloc_boot_pages(
+ frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
+ if (pfn == 0)
panic("Not enough memory for frame table.\n");
- frame_table = __va(p);
+ frame_table = __va(pfn << PAGE_SHIFT);
memset(frame_table, 0, frame_table_size);
printk("size of frame_table: %lukB\n",
frame_table_size >> 10);
* zeroes page to simulate the local APIC and another
* one for the IO-APIC.
*/
- if (!smp_found_config && detect_init_APIC()) {
- apic_phys = alloc_xenheap_page();
- apic_phys = __pa(apic_phys);
- } else
+ if (!smp_found_config && detect_init_APIC())
+ apic_phys = __pa(alloc_xenheap_page());
+ else
apic_phys = mp_lapic_addr;
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
}
} else {
fake_ioapic_page:
- ioapic_phys = alloc_xenheap_page();
- ioapic_phys = __pa(ioapic_phys);
+ ioapic_phys = __pa(alloc_xenheap_page());
}
set_fixmap_nocache(idx, ioapic_phys);
apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
break;
}
- l_arr = (unsigned long *)alloc_xenheap_page();
+ l_arr = alloc_xenheap_page();
ret = 0;
for( n = 0; n < num; )
n += j;
}
- free_xenheap_page((unsigned long)l_arr);
+ free_xenheap_page(l_arr);
put_domain(d);
}
void free_perdomain_pt(struct domain *d)
{
- free_xenheap_page((unsigned long)d->arch.mm_perdomain_pt);
+ free_xenheap_page(d->arch.mm_perdomain_pt);
#ifdef __x86_64__
- free_xenheap_page((unsigned long)d->arch.mm_perdomain_l2);
- free_xenheap_page((unsigned long)d->arch.mm_perdomain_l3);
+ free_xenheap_page(d->arch.mm_perdomain_l2);
+ free_xenheap_page(d->arch.mm_perdomain_l3);
#endif
}
v->arch.schedule_tail = continue_nonidle_task;
- d->shared_info = (void *)alloc_xenheap_page();
+ d->shared_info = alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
v->cpumap = CPUMAP_RUNANYWHERE;
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
- d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
+ d->arch.mm_perdomain_pt = alloc_xenheap_page();
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
v->arch.guest_vl3table = __linear_l3_table;
v->arch.guest_vl4table = __linear_l4_table;
- d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
+ d->arch.mm_perdomain_l2 = alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
- d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
+ d->arch.mm_perdomain_l3 = alloc_xenheap_page();
memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
/* Frame table and its size in pages. */
struct pfn_info *frame_table;
-unsigned long frame_table_size;
unsigned long max_page;
void __init init_frametable(void)
{
- unsigned long i, p, step;
+ unsigned long nr_pages, page_step, i, pfn;
- frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
- frame_table_size = max_page * sizeof(struct pfn_info);
- frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
+ frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
- step = (1 << L2_PAGETABLE_SHIFT);
- for ( i = 0; i < frame_table_size; i += step )
+ nr_pages = PFN_UP(max_page * sizeof(*frame_table));
+ page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
+
+ for ( i = 0; i < nr_pages; i += page_step )
{
- p = alloc_boot_pages(min(frame_table_size - i, step), step);
- if ( p == 0 )
+ pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
+ if ( pfn == 0 )
panic("Not enough memory for frame table\n");
map_pages_to_xen(
- FRAMETABLE_VIRT_START + i,
- p >> PAGE_SHIFT,
- step >> PAGE_SHIFT,
- PAGE_HYPERVISOR);
+ FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
+ pfn, page_step, PAGE_HYPERVISOR);
}
- memset(frame_table, 0, frame_table_size);
+ memset(frame_table, 0, nr_pages << PAGE_SHIFT);
}
void arch_init_memory(void)
int ptwr_init(struct domain *d)
{
- void *x = (void *)alloc_xenheap_page();
- void *y = (void *)alloc_xenheap_page();
+ void *x = alloc_xenheap_page();
+ void *y = alloc_xenheap_page();
if ( (x == NULL) || (y == NULL) )
{
if ( x != NULL )
- free_xenheap_page((unsigned long)x);
+ free_xenheap_page(x);
if ( y != NULL )
- free_xenheap_page((unsigned long)y);
+ free_xenheap_page(y);
return -ENOMEM;
}
void ptwr_destroy(struct domain *d)
{
cleanup_writable_pagetable(d);
- free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_ACTIVE].page);
- free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page);
+ free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page);
+ free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page);
}
void cleanup_writable_pagetable(struct domain *d)
/* So we see what's up */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack = (void *)alloc_xenheap_pages(STACK_ORDER);
+ stack = alloc_xenheap_pages(STACK_ORDER);
#if defined(__i386__)
stack_start.esp = (void *)__pa(stack);
#elif defined(__x86_64__)
rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
vmcs_size = vmx_msr_high & 0x1fff;
- vmcs = (struct vmcs_struct *) alloc_xenheap_pages(get_order(vmcs_size));
- memset((char *) vmcs, 0, vmcs_size); /* don't remove this */
+ vmcs = alloc_xenheap_pages(get_order(vmcs_size));
+ memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
vmcs->vmcs_revision_id = vmx_msr_low;
return vmcs;
int order;
order = (vmcs_size >> PAGE_SHIFT) - 1;
- free_xenheap_pages((unsigned long) vmcs, order);
+ free_xenheap_pages(vmcs, order);
}
static inline int construct_vmcs_controls(void)
if ( !early_boot )
{
- void *v = (void *)alloc_xenheap_page();
+ void *v = alloc_xenheap_page();
return ((v == NULL) ? NULL : virt_to_page(v));
}
void free_xen_pagetable(struct pfn_info *pg)
{
- free_xenheap_page((unsigned long)page_to_virt(pg));
+ free_xenheap_page(page_to_virt(pg));
}
l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
/* Create page tables for ioremap(). */
for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
{
- ioremap_pt = (void *)alloc_xenheap_page();
+ ioremap_pt = alloc_xenheap_page();
clear_page(ioremap_pt);
idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] =
l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
/* Set up mapping cache for domain pages. */
mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
- mapcache = (l1_pgentry_t *)alloc_xenheap_pages(mapcache_order);
+ mapcache = alloc_xenheap_pages(mapcache_order);
memset(mapcache, 0, PAGE_SIZE << mapcache_order);
for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] =
struct pfn_info *alloc_xen_pagetable(void)
{
extern int early_boot;
- unsigned long p;
+ unsigned long pfn;
if ( !early_boot )
return alloc_domheap_page(NULL);
- p = alloc_boot_pages(PAGE_SIZE, PAGE_SIZE);
- return ((p == 0) ? NULL : phys_to_page(p));
+ pfn = alloc_boot_pages(1, 1);
+ return ((pfn == 0) ? NULL : pfn_to_page(pfn));
}
void free_xen_pagetable(struct pfn_info *pg)
idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
/* Create user-accessible L2 directory to map the MPT for guests. */
- l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
+ l3_ro_mpt = alloc_xenheap_page();
clear_page(l3_ro_mpt);
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
l4e_from_page(
virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
- l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
+ l2_ro_mpt = alloc_xenheap_page();
clear_page(l2_ro_mpt);
l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
l3e_from_page(
grant_table_destroy(d);
free_perdomain_pt(d);
- free_xenheap_page((unsigned long)d->shared_info);
+ free_xenheap_page(d->shared_info);
free_domain_struct(d);
grant_table_t *lgt = ld->grant_table;
/* Grow the maptrack table. */
- new_mt = (void *)alloc_xenheap_pages(lgt->maptrack_order + 1);
+ new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
if ( new_mt == NULL )
{
put_domain(rd);
for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
new_mt[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
- free_xenheap_pages((unsigned long)lgt->maptrack, lgt->maptrack_order);
+ free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
lgt->maptrack = new_mt;
lgt->maptrack_order += 1;
lgt->maptrack_limit <<= 1;
memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
/* Tracking of mapped foreign frames table */
- if ( (t->maptrack = (void *)alloc_xenheap_page()) == NULL )
+ if ( (t->maptrack = alloc_xenheap_page()) == NULL )
goto no_mem;
t->maptrack_order = 0;
t->maptrack_limit = PAGE_SIZE / sizeof(grant_mapping_t);
t->maptrack[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
/* Shared grant table. */
- t->shared = (void *)alloc_xenheap_pages(ORDER_GRANT_FRAMES);
+ t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
if ( t->shared == NULL )
goto no_mem;
memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
{
xfree(t->active);
if ( t->maptrack != NULL )
- free_xenheap_page((unsigned long)t->maptrack);
+ free_xenheap_page(t->maptrack);
xfree(t);
}
return -ENOMEM;
{
/* Free memory relating to this grant table. */
d->grant_table = NULL;
- free_xenheap_pages((unsigned long)t->shared, ORDER_GRANT_FRAMES);
- free_xenheap_page((unsigned long)t->maptrack);
+ free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
+ free_xenheap_page(t->maptrack);
xfree(t->active);
xfree(t);
}
*/
/* Initialise allocator to handle up to @max_page pages. */
-unsigned long init_boot_allocator(unsigned long bitmap_start)
+physaddr_t init_boot_allocator(physaddr_t bitmap_start)
{
bitmap_start = round_pgup(bitmap_start);
return bitmap_start + bitmap_size;
}
-void init_boot_pages(unsigned long ps, unsigned long pe)
+void init_boot_pages(physaddr_t ps, physaddr_t pe)
{
unsigned long bad_pfn;
char *p;
}
}
-unsigned long alloc_boot_pages(unsigned long size, unsigned long align)
+unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
{
unsigned long pg, i;
- size = round_pgup(size) >> PAGE_SHIFT;
- align = round_pgup(align) >> PAGE_SHIFT;
-
- for ( pg = 0; (pg + size) < (bitmap_size*8); pg += align )
+ for ( pg = 0; (pg + nr_pfns) < (bitmap_size*8); pg += pfn_align )
{
- for ( i = 0; i < size; i++ )
+ for ( i = 0; i < nr_pfns; i++ )
if ( allocated_in_map(pg + i) )
break;
- if ( i == size )
+ if ( i == nr_pfns )
{
- map_alloc(pg, size);
- return pg << PAGE_SHIFT;
+ map_alloc(pg, nr_pfns);
+ return pg;
}
}
* XEN-HEAP SUB-ALLOCATOR
*/
-void init_xenheap_pages(unsigned long ps, unsigned long pe)
+void init_xenheap_pages(physaddr_t ps, physaddr_t pe)
{
unsigned long flags;
ps = round_pgup(ps);
pe = round_pgdown(pe);
- memguard_guard_range(__va(ps), pe - ps);
+ memguard_guard_range(phys_to_virt(ps), pe - ps);
/*
* Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
}
-unsigned long alloc_xenheap_pages(unsigned int order)
+void *alloc_xenheap_pages(unsigned int order)
{
unsigned long flags;
struct pfn_info *pg;
pg[i].u.inuse.type_info = 0;
}
- return (unsigned long)page_to_virt(pg);
+ return page_to_virt(pg);
no_memory:
printk("Cannot handle page request order %d!\n", order);
- return 0;
+ return NULL;
}
-void free_xenheap_pages(unsigned long p, unsigned int order)
+void free_xenheap_pages(void *v, unsigned int order)
{
unsigned long flags;
- memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT));
+ memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
local_irq_save(flags);
- free_heap_pages(MEMZONE_XEN, virt_to_page(p), order);
+ free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
local_irq_restore(flags);
}
* DOMAIN-HEAP SUB-ALLOCATOR
*/
-void init_domheap_pages(unsigned long ps, unsigned long pe)
+void init_domheap_pages(physaddr_t ps, physaddr_t pe)
{
ASSERT(!in_irq());
nr_pages = num_online_cpus() * opt_tbuf_size;
order = get_order(nr_pages * PAGE_SIZE);
- if ( (rawbuf = (char *)alloc_xenheap_pages(order)) == NULL )
+ if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
{
printk("Xen trace buffers: memory allocation failed\n");
return;
struct xmalloc_hdr *hdr;
unsigned long flags;
- hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(0);
+ hdr = alloc_xenheap_page();
if ( hdr == NULL )
return NULL;
struct xmalloc_hdr *hdr;
unsigned int pageorder = get_order(size);
- hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(pageorder);
+ hdr = alloc_xenheap_pages(pageorder);
if ( hdr == NULL )
return NULL;
/* Big allocs free directly. */
if ( hdr->size >= PAGE_SIZE )
{
- free_xenheap_pages((unsigned long)hdr, get_order(hdr->size));
+ free_xenheap_pages(hdr, get_order(hdr->size));
return;
}
if ( hdr->size == PAGE_SIZE )
{
BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
- free_xenheap_pages((unsigned long)hdr, 0);
+ free_xenheap_pages(hdr, 0);
}
else
{
return 0;
order = get_order(bytes);
- debugtrace_buf = (char *)alloc_xenheap_pages(order);
+ debugtrace_buf = alloc_xenheap_pages(order);
ASSERT(debugtrace_buf != NULL);
memset(debugtrace_buf, '\0', bytes);
{
BUG_ON(!port->driver->tx_empty);
if ( !port->txbuf )
- port->txbuf = (char *)alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
+ port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
}
/*
} while ( 0 )
extern struct pfn_info *frame_table;
-extern unsigned long frame_table_size;
extern unsigned long max_page;
void init_frametable(void);
#else
#define PAGE_SIZE (1 << PAGE_SHIFT)
#endif
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (~(intpte_t)(PAGE_SIZE-1))
#define PAGE_FLAG_MASK (~0U)
#ifndef __ASSEMBLY__
/* Extract flags into 12-bit integer, or turn 12-bit flags into a pte mask. */
#define get_pte_flags(x) ((int)(x) & 0xFFF)
-#define put_pte_flags(x) ((intpte_t)(x))
+#define put_pte_flags(x) ((intpte_t)((x) & 0xFFF))
#define L1_DISALLOW_MASK (0xFFFFF180U) /* PAT/GLOBAL */
#define L2_DISALLOW_MASK (0xFFFFF180U) /* PSE/GLOBAL */
#define __XEN_MM_H__
#include <xen/config.h>
+#include <xen/types.h>
#include <xen/list.h>
#include <xen/spinlock.h>
struct pfn_info;
/* Boot-time allocator. Turns into generic allocator after bootstrap. */
-unsigned long init_boot_allocator(unsigned long bitmap_start);
-void init_boot_pages(unsigned long ps, unsigned long pe);
-unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
+physaddr_t init_boot_allocator(physaddr_t bitmap_start);
+void init_boot_pages(physaddr_t ps, physaddr_t pe);
+unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
void end_boot_allocator(void);
/* Generic allocator. These functions are *not* interrupt-safe. */
void scrub_heap_pages(void);
/* Xen suballocator. These functions are interrupt-safe. */
-void init_xenheap_pages(unsigned long ps, unsigned long pe);
-unsigned long alloc_xenheap_pages(unsigned int order);
-void free_xenheap_pages(unsigned long p, unsigned int order);
+void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
+void *alloc_xenheap_pages(unsigned int order);
+void free_xenheap_pages(void *v, unsigned int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0))
-#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
+#define free_xenheap_page(v) (free_xenheap_pages(v,0))
/* Domain suballocator. These functions are *not* interrupt-safe.*/
-void init_domheap_pages(unsigned long ps, unsigned long pe);
+void init_domheap_pages(physaddr_t ps, physaddr_t pe);
struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
void free_domheap_pages(struct pfn_info *pg, unsigned int order);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
-#define free_domheap_page(_p) (free_domheap_pages(_p,0))
+#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
+#define free_domheap_page(p) (free_domheap_pages(p,0))
/* Automatic page scrubbing for dead domains. */
extern struct list_head page_scrub_list;